Let's speed things up a bit.

First trick: let's be much more efficient when creating the gabors. The general rule is to get rid of all for loops!


In [16]:
import numpy as np

def make_gabors(N, width, rng=None):
    if rng is None:
        rng = np.random.RandomState()
    lambd = rng.uniform(0.3, 0.8, N)
    theta = rng.uniform(0, 2 * np.pi, N)
    psi = rng.uniform(0, 2 * np.pi, N)
    sigma = rng.uniform(0.2, 0.5, N)
    gamma = rng.uniform(0.4, 0.8, N)
    x_offset = rng.uniform(-1, 1, N)
    y_offset = rng.uniform(-1, 1, N)
    
    x = np.linspace(-1, 1, width)
    X, Y = np.meshgrid(x, x)
    X.shape = width * width
    Y.shape = width * width
    
    X = X[None,:] - x_offset[:,None]
    Y = Y[None,:] + y_offset[:,None]
    
    cosTheta = np.cos(theta)
    sinTheta = np.sin(theta)
    xTheta = X * cosTheta[:,None]  + Y * sinTheta[:,None]
    yTheta = -X * sinTheta[:,None] + Y * cosTheta[:,None]
    
    e = np.exp( -(xTheta**2 + yTheta**2 * gamma[:,None]**2) / (2 * sigma[:,None]**2) )
    cos = np.cos(2 * np.pi * xTheta / lambd[:,None] + psi[:,None])    
    gabor = e * cos
    
    gabor = gabor / np.linalg.norm(gabor, axis=1)[:, None]
    return gabor

def plot_gabors(data, columns=None):
    if columns is None:
        columns = int(np.sqrt(len(data)))
    pylab.figure(figsize=(10,10))
    vmin = np.min(data)
    vmax = np.max(data)
    width = int(np.sqrt(data.shape[1]))
    for i, d in enumerate(data):
        w = columns - 1 - (i % columns)
        h = i / columns            
        d.shape = width, width
        pylab.imshow(d, extent=(w+0.025, w+0.975, h+0.025, h+0.975), 
                 interpolation='none', vmin=vmin, vmax=vmax, cmap='gray')
        pylab.xticks([])
        pylab.yticks([])
    pylab.xlim((0, columns))
    pylab.ylim((0, len(data) / columns))

In [17]:
import time
start = time.time()
make_gabors(N=10000, width=75)
end = time.time()
print end - start


9.26849198341

The code to generate gabors uses a lot of fun numpy operations, but it turns out those are all single-threaded numpy operations. So, if we're on a machine with multiple cores, it'll just use one of them. Since this gabor generation still takes a long time, let's parallelize it and make use of those multiple cores.


In [18]:
import multiprocessing
from functools import partial

def make_gabors_multi(N, width, processors=None):
    if processors is None:
        processors = multiprocessing.cpu_count()

    pool = multiprocessing.Pool(processors)

    N_multi = N / processors
    if N_multi * processors < N:
        N_multi += 1

    result = pool.map(partial(make_gabors, width=width), 
                      [N_multi] * processors)
    pool.close()
    return np.vstack(result)[:N]

In [19]:
import time
start = time.time()
make_gabors_multi(N=10000, width=75)
end = time.time()
print end - start


2.61708498001

Now let's quickly test this by generating a bunch and plotting them


In [2]:
a = make_gabors(N=1000, width=50)

plot_gabors(a[:25])
show()


Now for the other speedup. Here's the normal way of computing SVD


In [3]:
U, S, V = np.linalg.svd(a.T)
plot(S)
show()


It's a bit silly that we compute all those singular values and then throw them away. So instead, let's explicitly say how many singular values we want


In [4]:
import scipy.sparse.linalg
Us, Ss, Vs = scipy.sparse.linalg.svds(a.T, k=300)
plot(Ss[::-1])
show()


Now let's use this to build our model


In [26]:
N = 100000     # number of neurons
S = 5000     # number of sample points (eval_points)
K = 3        # number of gabors per sample point
width = 75   # width (and height) of the patch
SV = 300    # number of singular values to keep

encoders = make_gabors_multi(N, width)

samples = np.zeros((S, width * width))
for i in range(K):
    samples += make_gabors_multi(S, width)

Since we have to guess how many singular values to keep, we should pront out the ration of the smallest to the largest singular value. If this goes much above 0.01, we should increase SV (the number of singular values to keep)


In [27]:
U, S, V = scipy.sparse.linalg.svds(encoders.T, k=SV)
basis = U
print 'SV ratio:', np.min(S) / np.max(S)


SV ratio: 0.0185950481894

In [28]:
def compress(original):
    return np.dot(original, basis)

def uncompress(compressed):
    return np.dot(basis, compressed.T).T

In [29]:
import nengo
neuron_type = nengo.LIFRate()   # default is nengo.LIF()

stim_image = make_gabors(K, width)
stim_image = np.sum(stim_image, axis=0)

import nengo
model = nengo.Network()
with model:
    stim = nengo.Node(lambda t: compress(stim_image) if t < 0.1 else np.zeros(SV))
    ens = nengo.Ensemble(n_neurons=N, dimensions=SV, encoders=compress(encoders), eval_points=compress(samples), neuron_type=neuron_type)
    conn = nengo.Connection(ens, ens, synapse=0.1)
    nengo.Connection(stim, ens, synapse=None)
    probe = nengo.Probe(ens, synapse=0.01)

In [30]:
sim = nengo.Simulator(model)
print 'rmse:', np.linalg.norm(sim.data[conn].solver_info['rmses'])


rmse: 0.139009689036

In [31]:
sim.run(1)

In [32]:
data = uncompress(sim.data[probe])
plot_gabors(data[::-10])
show()
plot_gabors(np.array([stim_image]))
show()



In [ ]: